home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Languguage OS 2
/
Languguage OS II Version 10-94 (Knowledge Media)(1994).ISO
/
language
/
awe
/
awe-full.lha
/
Awe2
/
DoNotUseThisSrc
/
SharedMalloc-real.cc
< prev
next >
Wrap
C/C++ Source or Header
|
1990-08-08
|
10KB
|
412 lines
// This may look like C code, but it is really -*- C++ -*-
//
// Copyright (C) 1988 University of Illinois, Urbana, Illinois
//
// written by Dirk Grunwald (grunwald@cs.uiuc.edu)
//
//
// Allocator.c: based loosely on...
//
// malloc.c (Caltech) 2/21/82
// Chris Kingsley, kingsley@cit-20.
//
// $Header: /foobar/grunwald/Awe2/NewSrc/RCS/SharedMalloc-real.cc,v 4.1 89/10/18 13:28:22 grunwald Exp Locker: grunwald $
//
// This is a very fast storage allocator. It allocates blocks of a small
// number of different sizes, and keeps free lists of each size. In this
// implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
//
#define NDEBUG
#ifdef __GNUG__
# pragma implementation
#endif
#include <stdio.h>
#include <stream.h>
#include "Debug.h"
#include "assert.h"
#include "CpuMultiplexor.h"
#ifdef __GNUG__
extern "C" {
#endif
void *sbrk(int);
void exit(int x = 0);
void *malloc( unsigned );
void free(void *);
void perror(const char*);
#ifdef __GNUG__
};
#endif
const int NOTREACHED = 0;
extern int CpuMuxDebugFlag;
//
// calloc and cfree are defined in terms of malloc.
// alloca allocates space from the current stack.
//
#include "assert.h"
#include "SpinLock.h"
//
// Allocator.h: based loosely on...
//
// malloc.c (Caltech) 2/21/82
// Chris Kingsley, kingsley@cit-20.
//
// $Header: /foobar/grunwald/Awe2/NewSrc/RCS/SharedMalloc-real.cc,v 4.1 89/10/18 13:28:22 grunwald Exp Locker: grunwald $
//
//
//
// Below is a allocator sbuclass based loosely on..
//
// malloc.c (Caltech) 2/21/82
// Chris Kingsley, kingsley@cit-20.
//
// This is a very fast storage allocator. It allocates blocks of a small
// number of different sizes, and keeps free lists of each size. In this
// implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
//
//
// The overhead on a block is at least 4 bytes. When free, this space
// contains a pointer to the next free block, and the bottom two bits must
// be zero. When in use, the first byte is set to MAGIC, and the second
// byte is the size index. The remaining bytes are for alignment.
// If range checking is enabled and the size of the block fits
// in two bytes, then the top two bytes hold the size of the requested block
// plus the range checking words, and the header word MINUS ONE.
//
union overhead {
union overhead * ov_next; // when free
struct {
unsigned char ovu_magic; // magic number
unsigned char ovu_index; // bucket #
unsigned short ovu_size; // actual block size
unsigned int ovu_rmagic; // range magic number
} ovu;
};
#define ov_magic ovu.ovu_magic
#define ov_index ovu.ovu_index
#define ov_size ovu.ovu_size
#define ov_rmagic ovu.ovu_rmagic
#define MAGIC 0xff /* magic # on accounting info */
#define RMAGIC 0x55555555 /* magic # on range info */
#define RSLOP sizeof( unsigned int )
//
// nextf[i] is the pointer to the next free block of size 2^(i+3). The
// smallest allocatable block is 8 bytes. The overhead information
// precedes the data area returned to the user.
//
int const NumberOfBuckets = 30;
class BucketAllocator {
protected:
union overhead * nextf[ NumberOfBuckets ];
SpinLock bucketLock[ NumberOfBuckets ];
SpinLock topLock;
char * top;
char sbrkDisabled;
char initialized;
int morecore( int bucket );
void initialize();
public:
BucketAllocator();
~BucketAllocator();
void * allocate( unsigned nbytes );
void free( void * cp );
void disableFurtherBreaks();
};
BucketAllocator HardwareMemoryAllocator;
BucketAllocator::BucketAllocator()
{
initialize();
}
void
BucketAllocator::disableFurtherBreaks()
{
sbrkDisabled = 1;
}
void disableFurtherBreaks()
{
HardwareMemoryAllocator.disableFurtherBreaks();
}
void
BucketAllocator::initialize()
{
if (initialized) return;
//
// First allocate the state information for the object from the
// sourceMemoryObjectCache.
//
for( int i = 0; i < NumberOfBuckets; i++ ) {
nextf[ i ] = 0;
}
extern int end;
top = (char *) &end;
sbrkDisabled = 0;
initialized = 1;
}
//
// Deleteing the allocator doesn't free up your memory, it just
// makes us forget about it. However, this is only done when
// you call exit, so who cares.
//
BucketAllocator::~BucketAllocator()
{
}
void *
BucketAllocator::allocate( unsigned nbytes )
{
if (!initialized) {
initialize();
}
union overhead * p;
int bucket = 0;
unsigned shiftr;
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
cerr << "BucketAllocator::allocate( " << nbytes;
cerr << " ) for top = " << hex(long(top)) << "\n";
}
#endif
//
// Convert amount of memory requested into
// closest block size stored in hash buckets
// which satisfies request. Account for
// space used per block for accounting.
//
nbytes += sizeof( union overhead ) + RSLOP;
nbytes = ( nbytes + 3 ) &~ 3;
shiftr = ( nbytes - 1 ) >> 2;
// apart from this loop, this is O(1) */
while( shiftr >>= 1 )
bucket++;
Debug( "BucketAllocator::allocate: nbytes=%x, shiftr=%x, bucket=%x\n",
nbytes, shiftr, bucket );
assert( bucket < NumberOfBuckets );
//
// If nothing in hash bucket right now, request more memory from
// the system.
// This is probably pathalogical (?) and can lead to one processor
// always allocation memory for everybody else, but hopefully the
// locks will make it somewhat fairer.
//
Debug( "BucketAllocator::allocate: waiting on bucket %d lock\n",
bucket );
bucketLock[bucket].reserve();
while( nextf[ bucket ] == 0 ) {
Debug( "BucketAllocator::allocate: filling bucket %d\n",
bucket );
bucketLock[bucket].release();
if( ! morecore( bucket ) )
break;
bucketLock[bucket].reserve();
}
Debug( "BucketAllocator::allocate: nextf[bucket] = %x\n",
nextf[bucket] );
if( nextf[bucket] == 0 ) {
fprintf(stderr, "BucketAllocator::allocate REALLY out of memory\n" );
bucketLock[bucket].release();
return( 0 );
}
//
// Grab the next entry in the bucket.
//
p = nextf[ bucket ];
Debug( "BucketAllocator::allocate: p = %x\n", p );
//
// remove from linked list
//
nextf[ bucket ] = nextf[ bucket ]->ov_next;
bucketLock[bucket].release();
p->ov_magic = MAGIC;
p->ov_index= bucket;
//
// Record allocated size of block and
// bound space with magic numbers.
//
if( nbytes <= 0x10000 )
p->ov_size = nbytes - 1;
p->ov_rmagic = RMAGIC;
*((unsigned int *) ((char *) p + nbytes - RSLOP)) = RMAGIC;
Debug( "BucketAllocator::allocate: returning %A\n", (char *) (p + 1) );
return( (char *) (p + 1) );
}
//
// Get more memory into a bucket. Also prefetch the memory.
//
int
BucketAllocator::morecore( int bucket )
{
union overhead * op;
int rnu; // 2^rnu bytes will be requested
int nblks; // become nblks blocks of the desired size
int siz;
Debug( "BucketAllocator::morecore( bucket:%d )\n", bucket );
//
// take PAGESIZE (4k) unless the block is bigger than that
//
rnu = ( bucket <= 9 ) ? 12 : bucket + 3;
nblks = 1 << ( rnu - ( bucket + 3 ) ); // how many blocks to get
if( rnu < bucket )
rnu = bucket;
Debug( "BucketAllocator::morecore(): getting %d bytes\n", 1 << rnu );
//
// See if there is room left in the cached space for the request.
//
topLock.reserve();
int nbytes = 1 << rnu;
char *highest = (char *) sbrk(0);
if ( ( top + nbytes ) > highest ) {
if ( sbrkDisabled ) {
cerr << "BucketAllocator::morecore: out of memory\n";
cerr << "top = " << hex(long(top)) << "\n";
cerr << "highest = " << hex(long(highest)) << "\n";
cerr << "sbrk(0) = " << hex(long(sbrk(0))) << "\n";
topLock.release();
return( 0 );
}
else {
//
// Get more space & reduce the number of system calls.
//
highest = (char *) sbrk(nbytes * 2);
}
}
op = (union overhead *) top;
top += nbytes;
topLock.release();
//
// Round up to minimum allocation size boundary
// and deduct from block count to reflect.
//
if( (int) op & 7 ) {
op = (union overhead *) (((int)op + 8) &~ 7);
nblks--;
}
Debug( "BucketAllocator::morecore(): adjusted op = %x\n", op );
//
// Add new memory allocated to that on
// free list for this hash bucket.
//
siz = 1 << ( bucket + 3 );
union overhead * block = op;
Debug( "BucketAllocator::morecore(): nblks = %d size = %d\n",
nblks, siz );
while( --nblks > 0 ) {
block->ov_next = (union overhead *) ( (char *) block + siz );
block = (union overhead *) ( (char *) block + siz );
}
Debug( "BucketAllocator::morecore(): adding new memory to bucket %d\n",
bucket );
bucketLock[bucket].reserve();
block->ov_next = nextf[ bucket ]; // terminate the list with
// anything that was already
// there..
nextf[ bucket ] = op;
bucketLock[bucket].release();
return( 1 );
}
//
// Return memory to the free pool.
// Currently this does not return the physical pages associated with the
// memory, but it should.
//
void
BucketAllocator::free( void * cp )
{
int bucket;
union overhead *op;
Debug( "BucketAllocator::free( %x )\n", cp );
if( cp == 0 )
return;
op = (union overhead *) ( (char *) cp - sizeof( union overhead ) );
assert( op->ov_magic == MAGIC ); // make sure it was in use
assert( op->ov_rmagic == RMAGIC );
if( op->ov_index <= 13 )
assert( *(unsigned int *)((char *)op + op->ov_size + 1 - RSLOP) == RMAGIC );
assert( op->ov_index < NumberOfBuckets );
bucket = op->ov_index;
bucketLock[bucket].reserve();
op->ov_next = nextf[ bucket ];
nextf[ bucket ] = op;
bucketLock[bucket].release();
}
#ifdef __GNUG__
extern "C" void *malloc( unsigned size)
#else
void *malloc( unsigned size)
#endif
{
return( ( void * )HardwareMemoryAllocator.allocate( size ) );
}
#ifdef __GNUG__
extern "C" void free(void * ptr)
#else
void free(void * ptr)
#endif
{
HardwareMemoryAllocator.free( (void *) ptr );
}